In [1]:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import plotly.express as px
import statsmodels.api as sm
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from statsmodels.tsa.stattools import grangercausalitytests
from statsmodels.tsa.stattools import coint
import scipy
from itertools import permutations
In [2]:
def calculate_ols_trendline(y_values):
    x_values = np.arange(len(y_values))
    x_values = sm.add_constant(x_values)
    model = sm.OLS(y_values, x_values, missing='drop').fit()
    trendline = model.predict(x_values)
    return trendline
In [3]:
def test_granger_causality(data_frame, column1, column2, max_lags=5):

    # Replace inf values with NaN and drop rows with NaN values
    data_frame = data_frame[[column1, column2]].replace([np.inf, -np.inf], np.nan).dropna()

    # Perform the Granger causality test
    test_result = grangercausalitytests(data_frame[[column1, column2]],max_lags, verbose=False)
    
    print("Granger Causality Test Results:\n")
    for lag in range(1, max_lags+1):
        f_test = test_result[lag][0]['ssr_ftest']
        chi2_test = test_result[lag][0]['ssr_chi2test']
        lr_test = test_result[lag][0]['lrtest']
        params_ftest = test_result[lag][0]['params_ftest']
        
        print(f"Lag {lag}:")
        print(f"  F-test: Statistic = {f_test[0]}, P-value = {f_test[1]}")
        print(f"  Chi-squared test: Statistic = {chi2_test[0]}, P-value = {chi2_test[1]}")
        print(f"  Likelihood-ratio test: Statistic = {lr_test[0]}, P-value = {lr_test[1]}")
        print(f"  Params F-test: Statistic = {params_ftest[0]}, P-value = {params_ftest[1]}\n")
    
    return test_result
In [4]:
from itertools import permutations
import pandas as pd

def test_all_pairs_granger_causality(data_frame, max_lags=5):
    results = []
    columns = data_frame.columns
    for col1, col2 in permutations(columns, 2):
        data = data_frame[[col1, col2]].replace([np.inf, -np.inf], np.nan).dropna()
        test_result = grangercausalitytests(data, max_lags, verbose=False)
        
        for lag in range(1, max_lags+1):
            f_test = test_result[lag][0]['ssr_ftest']
            result = {
                'Causality Vallues': f'{col1}/{col2}',
                'Lag': lag,
                'F-Statistic': f_test[0],
                'P-value': f_test[1],
                'Significant': 'Yes' if f_test[1] < 0.05 else 'No'
            }
            results.append(result)
        
    granger_results_df = pd.DataFrame(results)
    return granger_results_df
In [5]:
def t_test(data_frame, column1, column2, equal_var=False):
    
    # Replace inf values with NaN and drop rows with NaN values
    data_frame = data_frame[[column1, column2]].replace([np.inf, -np.inf], np.nan).dropna()
    
    f_statistic, p_value = scipy.stats.ttest_ind(data_frame[column1], data_frame[column2])
    
    if p_value < 0.05:
        print("There is a significant difference between the groups.")
    else:
        print("No significant difference found between the groups.")
        
    return f_statistic, p_value
In [6]:
def test_levene(data_frame, column1, column2):

    # Replace inf values with NaN and drop rows with NaN values
    data_frame = data_frame[[column1, column2]].replace([np.inf, -np.inf], np.nan).dropna()

    # Perform the levene test
    f_statistic, p_value = scipy.stats.levene(data_frame[column1], data_frame[column2] , center='median', proportiontocut=0.05)
    
    print(f"Levene Test F-statistic: {f_statistic}")
    print(f"P-value: {p_value}")
    
    if p_value < 0.05:
        print("There is a significant difference between the groups.")
    else:
        print("No significant difference found between the groups.")

        
    return f_statistic, p_value
In [7]:
def test_anova(data_frame, columns,use_var='unequal',welch_correction=True):
    
    data_frame = data_frame[columns].replace([np.inf, -np.inf], np.nan).dropna()
    groups = [data_frame[col] for col in columns]
    f_statistic, p_value = scipy.stats.f_oneway(*groups)
    
    print(f"ANOVA Test F-statistic: {f_statistic}")
    print(f"P-value: {p_value}")
    
    if p_value < 0.05:
        print("There is a significant difference between the groups.")
    else:
        print("No significant difference found between the groups.")


    return f_statistic, p_value
In [8]:
def test_coint(data_frame, column1, column2):

    # Replace inf values with NaN and drop rows with NaN values
    data_frame = data_frame[[column1, column2]].replace([np.inf, -np.inf], np.nan).dropna()

    # Perform the Cointegration test
    score, p_value, crit_values =  coint(data_frame[column1], data_frame[column2])
    
    print(f"Cointegration Test Statistic: {score}")
    print(f"P-value: {p_value}")
    if p_value < 0.05:
        print("The series are likely cointegrated.")
    else:
        print("No evidence of cointegration.")
    
    return score, p_value, crit_values
In [9]:
def test_all_pairs_coint(data_frame):
    results = []
    columns = data_frame.columns
    for col1, col2 in permutations(columns, 2):
        data = data_frame[[col1, col2]].replace([np.inf, -np.inf], np.nan).dropna()
        if col1 == col2:
            continue
        else: 
            score, p_value, crit_values = test_coint(data_frame, col1, col2)
            result = {
            'Score': f'{col1}/{col2}',
            'Test Statistic': score,
            'P-value': p_value,
            'Cointegrated': 'Yes' if p_value < 0.05 else 'No'
            }
            results.append(result)
        
    coint_matrix = pd.DataFrame(results)
    
    return coint_matrix
       

Data set import and cleaning

In [10]:
euro = pd.read_csv('euro 5 years.csv')
bitcoin = pd.read_csv('BTC-USD 5 years.csv')
etherium = pd.read_csv('ETH-USD 5 years.csv')
sp = pd.read_csv('S&P 500 (SPX) Historical Data NASDAQ.csv')
cac = pd.read_csv('cac 40 5 years.csv')
fts = pd.read_csv('FTSE 100 5 years.csv')
pound = pd.read_csv('GBP_USD 5 years.csv')
In [11]:
fts[['Open', 'High', 'Low']] = fts[['Open', 'High', 'Low']].replace(',', '', regex=True).astype('float64')
cac[['Open', 'High', 'Low']] = cac[[' Open', ' High', ' Low']].replace(',', '', regex=True).astype('float64')

Function : convert same format Date, calculate the mean, concact the mean vallue in a new Data Frame and replace the NaN with linar interpolation methode

In [12]:
def process_dataframes(dataframes, date_formats):
    processed_dataframes = {}


    for name, df in dataframes.items():
       
        #Convert the Data set in same format and thest the Date in index
        df['Date'] = pd.to_datetime(df['Date'], format=date_formats.get(name), errors='coerce')
        df.set_index('Date', inplace=True)

        # Calculate mean prices
        mean_col_name = f'Mean Price {name.capitalize()}'
        df[mean_col_name] = df[['Open', 'High', 'Low']].mean(axis=1)
        processed_dataframes[name] = df[mean_col_name]

    # Concatenate mean prices in new DataFrame
    all_data_mean = pd.concat(processed_dataframes.values(), axis=1)
    all_data_mean.columns = processed_dataframes.keys()
    
    #Convert CAC 40 and FTSE 100 in dolars and replace the actual vallue
    all_data_mean['cac'] = all_data_mean['cac'] * all_data_mean['euro']
    all_data_mean['fts'] = all_data_mean['fts'] * all_data_mean['pound']
    
    #Remove the remplace vallue with interpolate function
    all_data_mean = all_data_mean[all_data_mean.columns].interpolate(method= 'linear' )



    return all_data_mean

dataframes = {'euro': euro, 'bitcoin': bitcoin, 'etherium': etherium, 'fts': fts, 'pound': pound, 
              'sp': sp, 'cac': cac}
date_formats = {'euro': None, 'bitcoint': None, 'etherium': None, 'fts': '%d/%m/%Y', 
                'pound': '%d/%m/%Y', 'sp': '%m/%d/%Y', 'cac': '%m/%d/%y'}


all_data_mean = process_dataframes(dataframes, date_formats)
In [13]:
all_data_mean.columns
Out[13]:
Index(['euro', 'bitcoin', 'etherium', 'fts', 'pound', 'sp', 'cac'], dtype='object')
In [14]:
all_data_mean.info()
<class 'pandas.core.frame.DataFrame'>
DatetimeIndex: 1891 entries, 2018-09-12 to 2023-12-11
Data columns (total 7 columns):
 #   Column    Non-Null Count  Dtype  
---  ------    --------------  -----  
 0   euro      1827 non-null   float64
 1   bitcoin   1827 non-null   float64
 2   etherium  1827 non-null   float64
 3   fts       1827 non-null   float64
 4   pound     1827 non-null   float64
 5   sp        1830 non-null   float64
 6   cac       1827 non-null   float64
dtypes: float64(7)
memory usage: 118.2 KB

Bitcoint/Etherium analyse¶

In [15]:
all_data_mean[['bitcoin', 'etherium']].corr(method= 'spearman')
Out[15]:
bitcoin etherium
bitcoin 1.000000 0.953484
etherium 0.953484 1.000000
In [16]:
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(20, 15), sharex=False)

# Plot the mean price of Bitcoin
all_data_mean['bitcoin'].plot(ax=ax1, fontsize=16, color='blue')
ax1.set_ylabel('Mean Price Bitcoin')
ax1.set_title('Mean Prices of Bitcoin and Ethereum with Rolling Correlation')

# Plot the mean price of Ethereum
all_data_mean['etherium'].plot(ax=ax2, fontsize=16, color='orange')
ax2.set_ylabel('Mean Price Ethereum')

# Calculate and plot the rolling correlation in the third subplot
rolling_corr = all_data_mean['etherium'].rolling(30).corr(all_data_mean['bitcoin'])
rolling_corr.plot(ax=ax3, fontsize=16, color='green')
ax3.set_ylabel('Rolling Correlation')
ax3.axhline(y=0, color='red', linestyle='--', linewidth=2)

# Add legend to the third subplot
ax1.legend(['Bitcoin Mean Price'])
ax2.legend(['Bitcoin Mean Etherium'])
ax3.legend(['Rolling Correlation BTC/ETH'])

# Use the seaborn style
plt.style.use('seaborn-v0_8-colorblind')

plt.savefig('bitcoin_etheriun.jpeg')

# Show the plots
plt.show()
No description has been provided for this image

Make the Granger Causality score beetwenn Bitcoin and Ehterium

In [17]:
test_granger_causality(all_data_mean, 'bitcoin', 'etherium', max_lags=5)
Granger Causality Test Results:

Lag 1:
  F-test: Statistic = 20.47931250867985, P-value = 6.417180591997906e-06
  Chi-squared test: Statistic = 20.513014065194408, P-value = 5.922719066664915e-06
  Likelihood-ratio test: Statistic = 20.398649686034332, P-value = 6.2874143889837596e-06
  Params F-test: Statistic = 20.479312508695152, P-value = 6.417180591948046e-06

Lag 2:
  F-test: Statistic = 11.217305932888081, P-value = 1.4393517271063477e-05
  Chi-squared test: Statistic = 22.496245414857967, P-value = 1.3031739092009448e-05
  Likelihood-ratio test: Statistic = 22.358722057273553, P-value = 1.395934992576251e-05
  Params F-test: Statistic = 11.217305932889026, P-value = 1.4393517271049108e-05

Lag 3:
  F-test: Statistic = 10.03932170550651, P-value = 1.4631471854955786e-06
  Chi-squared test: Statistic = 30.233994701448328, P-value = 1.2321700321788243e-06
  Likelihood-ratio test: Statistic = 29.98615554979915, P-value = 1.3893420550221234e-06
  Params F-test: Statistic = 10.039321705508376, P-value = 1.4631471854918153e-06

Lag 4:
  F-test: Statistic = 6.938798500217844, P-value = 1.5313121221361458e-05
  Chi-squared test: Statistic = 27.89289893251848, P-value = 1.3112092151438257e-05
  Likelihood-ratio test: Statistic = 27.681662576447707, P-value = 1.4469768221310347e-05
  Params F-test: Statistic = 6.938798500217278, P-value = 1.5313121221377616e-05

Lag 5:
  F-test: Statistic = 5.6521512378289955, P-value = 3.533789083992374e-05
  Chi-squared test: Statistic = 28.432411803767064, P-value = 2.9956599752444827e-05
  Likelihood-ratio test: Statistic = 28.212848370159918, P-value = 3.307219407698228e-05
  Params F-test: Statistic = 5.652151237829557, P-value = 3.533789083988168e-05

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning: verbose is deprecated since functions should not print results
  warnings.warn(
Out[17]:
{1: ({'ssr_ftest': (20.47931250867985, 6.417180591997906e-06, 1823.0, 1),
   'ssr_chi2test': (20.513014065194408, 5.922719066664915e-06, 1),
   'lrtest': (20.398649686034332, 6.2874143889837596e-06, 1),
   'params_ftest': (20.479312508695152, 6.417180591948046e-06, 1823.0, 1.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x1329071d0>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x13297b990>,
   array([[0., 1., 0.]])]),
 2: ({'ssr_ftest': (11.217305932888081, 1.4393517271063477e-05, 1820.0, 2),
   'ssr_chi2test': (22.496245414857967, 1.3031739092009448e-05, 2),
   'lrtest': (22.358722057273553, 1.395934992576251e-05, 2),
   'params_ftest': (11.217305932889026, 1.4393517271049108e-05, 1820.0, 2.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132979490>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132988210>,
   array([[0., 0., 1., 0., 0.],
          [0., 0., 0., 1., 0.]])]),
 3: ({'ssr_ftest': (10.03932170550651, 1.4631471854955786e-06, 1817.0, 3),
   'ssr_chi2test': (30.233994701448328, 1.2321700321788243e-06, 3),
   'lrtest': (29.98615554979915, 1.3893420550221234e-06, 3),
   'params_ftest': (10.039321705508376, 1.4631471854918153e-06, 1817.0, 3.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132937110>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132937e50>,
   array([[0., 0., 0., 1., 0., 0., 0.],
          [0., 0., 0., 0., 1., 0., 0.],
          [0., 0., 0., 0., 0., 1., 0.]])]),
 4: ({'ssr_ftest': (6.938798500217844, 1.5313121221361458e-05, 1814.0, 4),
   'ssr_chi2test': (27.89289893251848, 1.3112092151438257e-05, 4),
   'lrtest': (27.681662576447707, 1.4469768221310347e-05, 4),
   'params_ftest': (6.938798500217278, 1.5313121221377616e-05, 1814.0, 4.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x13297ce50>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x13297e390>,
   array([[0., 0., 0., 0., 1., 0., 0., 0., 0.],
          [0., 0., 0., 0., 0., 1., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 1., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 1., 0.]])]),
 5: ({'ssr_ftest': (5.6521512378289955, 3.533789083992374e-05, 1811.0, 5),
   'ssr_chi2test': (28.432411803767064, 2.9956599752444827e-05, 5),
   'lrtest': (28.212848370159918, 3.307219407698228e-05, 5),
   'params_ftest': (5.652151237829557, 3.533789083988168e-05, 1811.0, 5.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x1328a2310>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132945710>,
   array([[0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.]])])}

Caluclate the Covariance vallue for now if I can run a classic T-test or a Walsh t-test

In [18]:
plt.figure(figsize=(8, 6))
sns.heatmap(all_data_mean[['bitcoin', 'etherium']].cov(), annot=True)
plt.title('Heatmap Covariance Bitcoin - Etherium')
Out[18]:
Text(0.5, 1.0, 'Heatmap Covariance Bitcoin - Etherium')
No description has been provided for this image

Calculate the cointegration score for Bitcoin and Etherium

In [19]:
test_coint(all_data_mean, 'bitcoin', 'etherium')
Cointegration Test Statistic: -3.0553828789607036
P-value: 0.0976203440200703
No evidence of cointegration.
Out[19]:
(-3.0553828789607036,
 0.0976203440200703,
 array([-3.90244781, -3.33947821, -3.04677349]))

Because the covaraince are not equal i made a Welch t-test

In [20]:
t_test(all_data_mean, 'bitcoin', 'etherium', equal_var=False)
There is a significant difference between the groups.
Out[20]:
(60.30652273568337, 0.0)

I compare the mean vallue of Bitcoin and Ehterium

In [21]:
all_data_mean['bitcoin'].mean()
Out[21]:
24124.186876242842
In [22]:
all_data_mean['etherium'].mean()
Out[22]:
1382.9994836761541

Analyse S&P/CAC/FTSE¶

Make the Granger Causality score beetwenn S&P/CAC4/FTSE 100

In [23]:
test_granger_causality(all_data_mean, 'sp', 'fts', max_lags=5)
Granger Causality Test Results:

Lag 1:
  F-test: Statistic = 4.4752612222148604, P-value = 0.03452424709131348
  Chi-squared test: Statistic = 4.482625886870178, P-value = 0.034241073415757405
  Likelihood-ratio test: Statistic = 4.4771327023954655, P-value = 0.03435130546540045
  Params F-test: Statistic = 4.475261222213212, P-value = 0.03452424709134593

Lag 2:
  F-test: Statistic = 4.200003686788619, P-value = 0.015141119632002667
  Chi-squared test: Statistic = 8.423084316911241, P-value = 0.014823490530755106
  Likelihood-ratio test: Statistic = 8.403706015677017, P-value = 0.014967815627883642
  Params F-test: Statistic = 4.2000036867881185, P-value = 0.015141119632008341

Lag 3:
  F-test: Statistic = 8.699830484802078, P-value = 9.907916896910488e-06
  Chi-squared test: Statistic = 26.20003985296476, P-value = 8.660473619241372e-06
  Likelihood-ratio test: Statistic = 26.013653146786964, P-value = 9.474835430997154e-06
  Params F-test: Statistic = 8.699830484801922, P-value = 9.907916896912548e-06

Lag 4:
  F-test: Statistic = 6.4403730897738605, P-value = 3.8198418902414176e-05
  Chi-squared test: Statistic = 25.88930571699613, P-value = 3.331320372372934e-05
  Likelihood-ratio test: Statistic = 25.707194574322784, P-value = 3.625068224360214e-05
  Params F-test: Statistic = 6.440373089774692, P-value = 3.8198418902356775e-05

Lag 5:
  F-test: Statistic = 5.797266685456754, P-value = 2.5602537946846245e-05
  Chi-squared test: Statistic = 29.162396192441207, P-value = 2.1547085647819246e-05
  Likelihood-ratio test: Statistic = 28.931474606899428, P-value = 2.3916462740999062e-05
  Params F-test: Statistic = 5.797266685455616, P-value = 2.5602537946911734e-05

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning: verbose is deprecated since functions should not print results
  warnings.warn(
Out[23]:
{1: ({'ssr_ftest': (4.4752612222148604, 0.03452424709131348, 1823.0, 1),
   'ssr_chi2test': (4.482625886870178, 0.034241073415757405, 1),
   'lrtest': (4.4771327023954655, 0.03435130546540045, 1),
   'params_ftest': (4.475261222213212, 0.03452424709134593, 1823.0, 1.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a5ed90>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a47650>,
   array([[0., 1., 0.]])]),
 2: ({'ssr_ftest': (4.200003686788619, 0.015141119632002667, 1820.0, 2),
   'ssr_chi2test': (8.423084316911241, 0.014823490530755106, 2),
   'lrtest': (8.403706015677017, 0.014967815627883642, 2),
   'params_ftest': (4.2000036867881185, 0.015141119632008341, 1820.0, 2.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a68650>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a6a650>,
   array([[0., 0., 1., 0., 0.],
          [0., 0., 0., 1., 0.]])]),
 3: ({'ssr_ftest': (8.699830484802078, 9.907916896910488e-06, 1817.0, 3),
   'ssr_chi2test': (26.20003985296476, 8.660473619241372e-06, 3),
   'lrtest': (26.013653146786964, 9.474835430997154e-06, 3),
   'params_ftest': (8.699830484801922, 9.907916896912548e-06, 1817.0, 3.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a69b90>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a6bdd0>,
   array([[0., 0., 0., 1., 0., 0., 0.],
          [0., 0., 0., 0., 1., 0., 0.],
          [0., 0., 0., 0., 0., 1., 0.]])]),
 4: ({'ssr_ftest': (6.4403730897738605, 3.8198418902414176e-05, 1814.0, 4),
   'ssr_chi2test': (25.88930571699613, 3.331320372372934e-05, 4),
   'lrtest': (25.707194574322784, 3.625068224360214e-05, 4),
   'params_ftest': (6.440373089774692, 3.8198418902356775e-05, 1814.0, 4.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a69dd0>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a6b010>,
   array([[0., 0., 0., 0., 1., 0., 0., 0., 0.],
          [0., 0., 0., 0., 0., 1., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 1., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 1., 0.]])]),
 5: ({'ssr_ftest': (5.797266685456754, 2.5602537946846245e-05, 1811.0, 5),
   'ssr_chi2test': (29.162396192441207, 2.1547085647819246e-05, 5),
   'lrtest': (28.931474606899428, 2.3916462740999062e-05, 5),
   'params_ftest': (5.797266685455616, 2.5602537946911734e-05, 1811.0, 5.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a6b250>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a71290>,
   array([[0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.]])])}
In [24]:
test_granger_causality(all_data_mean, 'sp', 'cac', max_lags=5)
Granger Causality Test Results:

Lag 1:
  F-test: Statistic = 0.006372709121573145, P-value = 0.9363818225358022
  Chi-squared test: Statistic = 0.0063831963005993204, P-value = 0.9363208398303555
  Likelihood-ratio test: Statistic = 0.006383185143931769, P-value = 0.9363208953618748
  Params F-test: Statistic = 0.006372709120115911, P-value = 0.936381822542859

Lag 2:
  F-test: Statistic = 10.515415139707347, P-value = 2.88003150499978e-05
  Chi-squared test: Statistic = 21.088607285676826, P-value = 2.634311406754158e-05
  Likelihood-ratio test: Statistic = 20.967694200826372, P-value = 2.7984854624116913e-05
  Params F-test: Statistic = 10.515415139708505, P-value = 2.8800315049965315e-05

Lag 3:
  F-test: Statistic = 15.280736532302713, P-value = 8.044615755909477e-10
  Chi-squared test: Statistic = 46.01881689860233, P-value = 5.619478353774422e-10
  Likelihood-ratio test: Statistic = 45.44788145766506, P-value = 7.431450464018619e-10
  Params F-test: Statistic = 15.280736532302742, P-value = 8.044615755908848e-10

Lag 4:
  F-test: Statistic = 11.262497538874971, P-value = 4.992873681567404e-09
  Chi-squared test: Statistic = 45.273501683283506, P-value = 3.4879624313124328e-09
  Likelihood-ratio test: Statistic = 44.720464320038445, P-value = 4.5451865559232495e-09
  Params F-test: Statistic = 11.262497538875659, P-value = 4.992873681561001e-09

Lag 5:
  F-test: Statistic = 9.954791992402805, P-value = 2.074994713517156e-09
  Chi-squared test: Statistic = 50.07628661004393, P-value = 1.336868223077017e-09
  Likelihood-ratio test: Statistic = 49.40048686202499, P-value = 1.83795015199251e-09
  Params F-test: Statistic = 9.954791992402864, P-value = 2.0749947135167433e-09

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning: verbose is deprecated since functions should not print results
  warnings.warn(
Out[24]:
{1: ({'ssr_ftest': (0.006372709121573145, 0.9363818225358022, 1823.0, 1),
   'ssr_chi2test': (0.0063831963005993204, 0.9363208398303555, 1),
   'lrtest': (0.006383185143931769, 0.9363208953618748, 1),
   'params_ftest': (0.006372709120115911, 0.936381822542859, 1823.0, 1.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a72150>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a72910>,
   array([[0., 1., 0.]])]),
 2: ({'ssr_ftest': (10.515415139707347, 2.88003150499978e-05, 1820.0, 2),
   'ssr_chi2test': (21.088607285676826, 2.634311406754158e-05, 2),
   'lrtest': (20.967694200826372, 2.7984854624116913e-05, 2),
   'params_ftest': (10.515415139708505, 2.8800315049965315e-05, 1820.0, 2.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a71e10>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a71c90>,
   array([[0., 0., 1., 0., 0.],
          [0., 0., 0., 1., 0.]])]),
 3: ({'ssr_ftest': (15.280736532302713, 8.044615755909477e-10, 1817.0, 3),
   'ssr_chi2test': (46.01881689860233, 5.619478353774422e-10, 3),
   'lrtest': (45.44788145766506, 7.431450464018619e-10, 3),
   'params_ftest': (15.280736532302742, 8.044615755908848e-10, 1817.0, 3.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a719d0>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a71a50>,
   array([[0., 0., 0., 1., 0., 0., 0.],
          [0., 0., 0., 0., 1., 0., 0.],
          [0., 0., 0., 0., 0., 1., 0.]])]),
 4: ({'ssr_ftest': (11.262497538874971, 4.992873681567404e-09, 1814.0, 4),
   'ssr_chi2test': (45.273501683283506, 3.4879624313124328e-09, 4),
   'lrtest': (44.720464320038445, 4.5451865559232495e-09, 4),
   'params_ftest': (11.262497538875659, 4.992873681561001e-09, 1814.0, 4.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a73810>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a73c10>,
   array([[0., 0., 0., 0., 1., 0., 0., 0., 0.],
          [0., 0., 0., 0., 0., 1., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 1., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 1., 0.]])]),
 5: ({'ssr_ftest': (9.954791992402805, 2.074994713517156e-09, 1811.0, 5),
   'ssr_chi2test': (50.07628661004393, 1.336868223077017e-09, 5),
   'lrtest': (49.40048686202499, 1.83795015199251e-09, 5),
   'params_ftest': (9.954791992402864, 2.0749947135167433e-09, 1811.0, 5.0)},
  [<statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a80450>,
   <statsmodels.regression.linear_model.RegressionResultsWrapper at 0x132a80790>,
   array([[0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.],
          [0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.]])])}

Create a heatmap with the correlation score and a line plot of the 3 vallue for see the evolution during the time.

In [25]:
plt.figure(figsize=(8, 6))
sns.heatmap(all_data_mean[['cac', 'fts','sp']].corr(), annot=True, cmap='coolwarm')
plt.title('Heatmap Correlation CAC 40/S&P/FTSE 100')
Out[25]:
Text(0.5, 1.0, 'Heatmap Correlation CAC 40/S&P/FTSE 100')
No description has been provided for this image
In [26]:
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(20, 15))

# Plot for 'Mean Price CAC 40'
all_data_mean['cac'].plot(ax=ax1,fontsize=16, color='blue')
ax1.set_title('Mean Price CAC 40')
ax1.legend()

# Plot for 'Mean Price FTSE 100'
all_data_mean['fts'].plot(ax=ax2, fontsize=16, color='orange')
ax2.set_title('Mean Price FTSE 100')
ax2.legend()

# Plot for 'Mean Price S&P 500'
all_data_mean['sp'].plot(ax=ax3,fontsize=16 , color='green')
ax3.set_title('Mean Price S&P 500')
ax3.legend()
Out[26]:
<matplotlib.legend.Legend at 0x132bbca10>
No description has been provided for this image

I have create a Correlation line wiht statsmodels for visualise the mean evolution of the correaltion score during time.

In [27]:
corr_ftse_cac = all_data_mean['fts'].rolling(30).corr(all_data_mean['cac'])
corr_ftse_sp = all_data_mean['fts'].rolling(30).corr(all_data_mean['sp'])
corr_cac_sp = all_data_mean['cac'].rolling(30).corr(all_data_mean['sp'])

fig = make_subplots(rows=3, cols=1)

corr_names = ["FTSE 100 vs CAC 40", "FTSE 100 vs S&P 500", "CAC 40 vs S&P 500"]


# Add traces and trendlines
for i, (corr, name) in enumerate(zip([corr_ftse_cac, corr_ftse_sp, corr_cac_sp], corr_names), 1):
    fig.add_trace(go.Scatter(x=all_data_mean.index, y=corr, mode='lines', name=name), row=i, col=1)
    fig.add_trace(go.Scatter(x=all_data_mean.index, y=calculate_ols_trendline(corr), mode='lines', name=f'{name} Trendline', line=dict(color='red')), row=i, col=1)

fig.update_layout(title='Rolling Correlations Over Time with OLS Trendlines', height=600, showlegend=True)
fig.update_xaxes(title_text='Date', row=3, col=1)
fig.update_yaxes(title_text='Correlation')

fig.show()

Like for bitcoint I have create a covarianse heatmap for now if i can use the clasic Anova test or the Welch variation.

In [28]:
plt.figure(figsize=(8, 6))
sns.heatmap(all_data_mean[['sp', 'fts','cac']].cov(), annot=True)
plt.title('Heatmap Covariance CAC 40/S&P/FTSE 100')
Out[28]:
Text(0.5, 1.0, 'Heatmap Covariance CAC 40/S&P/FTSE 100')
No description has been provided for this image
In [29]:
test_anova(all_data_mean, ['fts', 'cac','sp'],use_var='unequal',welch_correction=True)
ANOVA Test F-statistic: 20623.063222595832
P-value: 0.0
There is a significant difference between the groups.
Out[29]:
(20623.063222595832, 0.0)

To be sure of my Anova result I run three Welsh t-test to see if the result are the same

In [30]:
t_test(all_data_mean, 'sp', 'cac', equal_var=False)
There is a significant difference between the groups.
Out[30]:
(-115.00599433497874, 0.0)
In [31]:
t_test(all_data_mean, 'fts', 'cac', equal_var=False)
There is a significant difference between the groups.
Out[31]:
(80.04109944160375, 0.0)
In [32]:
t_test(all_data_mean, 'fts', 'sp', equal_var=False)
There is a significant difference between the groups.
Out[32]:
(224.8170799064647, 0.0)

I use the Levene test to compare the Variance betwen the group and validate my Covariance tab result

In [33]:
test_levene(all_data_mean, 'fts', 'cac')
Levene Test F-statistic: 157.07260627285507
P-value: 2.603245681724523e-35
There is a significant difference between the groups.
Out[33]:
(157.07260627285507, 2.603245681724523e-35)
In [34]:
test_levene(all_data_mean, 'fts', 'sp')
Levene Test F-statistic: 4.787779895620347
P-value: 0.02872550082027481
There is a significant difference between the groups.
Out[34]:
(4.787779895620347, 0.02872550082027481)
In [35]:
test_levene(all_data_mean, 'sp', 'cac')
Levene Test F-statistic: 340.6675502553561
P-value: 8.48238533801609e-73
There is a significant difference between the groups.
Out[35]:
(340.6675502553561, 8.48238533801609e-73)

Analyse all market together¶

For this part I have make scatter matrix for compare the varaition of all data together and a heat map for compare the correlation score.

In [36]:
sns.pairplot(all_data_mean,  kind='reg', diag_kind= 'kde', plot_kws=dict(marker="+", line_kws={'color': 'red'}))
Out[36]:
<seaborn.axisgrid.PairGrid at 0x133501a50>
No description has been provided for this image
In [37]:
sns.pairplot(all_data_mean.pct_change(),  kind='reg', diag_kind= 'kde', plot_kws=dict(marker="+", line_kws={'color': 'red'}))
Out[37]:
<seaborn.axisgrid.PairGrid at 0x136586c50>
No description has been provided for this image
In [38]:
plt.figure(figsize=(8, 6))
sns.heatmap(all_data_mean.corr(), annot=True, cmap='coolwarm')
plt.title('Heatmap Correlation')
Out[38]:
Text(0.5, 1.0, 'Heatmap Correlation')
No description has been provided for this image
In [39]:
plt.figure(figsize=(8, 6))
sns.heatmap(all_data_mean.pct_change().corr(), annot=True, cmap='coolwarm')
plt.title('Heatmap Correlation Percentage Variation')
Out[39]:
Text(0.5, 1.0, 'Heatmap Correlation Percentage Variation')
No description has been provided for this image

I make Cointegration test and Grange for compare all the data together and create a scatter plot matrix to visuliase the result

In [40]:
coint_matrix = test_all_pairs_coint(all_data_mean)
coint_matrix.sort_values(by='P-value')
Cointegration Test Statistic: -1.3676540416643885
P-value: 0.8083853324902368
No evidence of cointegration.
Cointegration Test Statistic: -1.3081862910236866
P-value: 0.8272517835050854
No evidence of cointegration.
Cointegration Test Statistic: -1.2369247376622878
P-value: 0.8479649886597967
No evidence of cointegration.
Cointegration Test Statistic: -2.8199407444638216
P-value: 0.15942239182810208
No evidence of cointegration.
Cointegration Test Statistic: -1.3339701044810772
P-value: 0.8192497775552554
No evidence of cointegration.
Cointegration Test Statistic: -1.222632319803945
P-value: 0.8518745535910225
No evidence of cointegration.
Cointegration Test Statistic: -1.531495279489717
P-value: 0.748917905786902
No evidence of cointegration.
Cointegration Test Statistic: -3.0553828789607036
P-value: 0.0976203440200703
No evidence of cointegration.
Cointegration Test Statistic: -1.944669111181519
P-value: 0.5571923696502682
No evidence of cointegration.
Cointegration Test Statistic: -1.7949259357730085
P-value: 0.6321834911480554
No evidence of cointegration.
Cointegration Test Statistic: -2.869771898342179
P-value: 0.1444908799944224
No evidence of cointegration.
Cointegration Test Statistic: -2.387101735398906
P-value: 0.3309880036139313
No evidence of cointegration.
Cointegration Test Statistic: -1.4147060534190505
P-value: 0.7924279671062884
No evidence of cointegration.
Cointegration Test Statistic: -2.9980842792915006
P-value: 0.11066156178941328
No evidence of cointegration.
Cointegration Test Statistic: -2.1384009367049948
P-value: 0.45631823803075483
No evidence of cointegration.
Cointegration Test Statistic: -1.4106569919342273
P-value: 0.7938369775111298
No evidence of cointegration.
Cointegration Test Statistic: -2.9526184810941474
P-value: 0.1219020413279614
No evidence of cointegration.
Cointegration Test Statistic: -2.612074522957029
P-value: 0.23199324253726483
No evidence of cointegration.
Cointegration Test Statistic: -2.8814001546353425
P-value: 0.14115313010195019
No evidence of cointegration.
Cointegration Test Statistic: -3.220780871053438
P-value: 0.06652076426886551
No evidence of cointegration.
Cointegration Test Statistic: -3.313328597109023
P-value: 0.05293022870087099
No evidence of cointegration.
Cointegration Test Statistic: -2.5412552357031335
P-value: 0.26118354181058945
No evidence of cointegration.
Cointegration Test Statistic: -2.6886720335495875
P-value: 0.20367929866742768
No evidence of cointegration.
Cointegration Test Statistic: -2.4198396558945814
P-value: 0.3155169274534616
No evidence of cointegration.
Cointegration Test Statistic: -3.124495326895609
P-value: 0.08348814008765171
No evidence of cointegration.
Cointegration Test Statistic: -2.29324845781286
P-value: 0.3769265425915912
No evidence of cointegration.
Cointegration Test Statistic: -2.1340895043688373
P-value: 0.4585659104168916
No evidence of cointegration.
Cointegration Test Statistic: -1.4616755667052028
P-value: 0.7755926623518101
No evidence of cointegration.
Cointegration Test Statistic: -1.913904321267141
P-value: 0.5729384036763813
No evidence of cointegration.
Cointegration Test Statistic: -1.7453903061198746
P-value: 0.6558487611115338
No evidence of cointegration.
Cointegration Test Statistic: -1.884489739694218
P-value: 0.5878523211374982
No evidence of cointegration.
Cointegration Test Statistic: -3.0335118900829254
P-value: 0.10245299511428724
No evidence of cointegration.
Cointegration Test Statistic: -3.0031632844551006
P-value: 0.10945549989677866
No evidence of cointegration.
Cointegration Test Statistic: -1.2891558552514526
P-value: 0.8329839553983623
No evidence of cointegration.
Cointegration Test Statistic: -1.192514683430678
P-value: 0.8598500923798182
No evidence of cointegration.
Cointegration Test Statistic: -2.0434239548137616
P-value: 0.5059446733025528
No evidence of cointegration.
Cointegration Test Statistic: -1.9445533332474105
P-value: 0.5572518775043169
No evidence of cointegration.
Cointegration Test Statistic: -2.872820102105986
P-value: 0.14361059433534662
No evidence of cointegration.
Cointegration Test Statistic: -2.9309700525501174
P-value: 0.1275390917727217
No evidence of cointegration.
Cointegration Test Statistic: -1.393225892908496
P-value: 0.7998257898892177
No evidence of cointegration.
Cointegration Test Statistic: -1.5065856745413173
P-value: 0.7586580902645625
No evidence of cointegration.
Cointegration Test Statistic: -2.227417747748132
P-value: 0.4102730813268515
No evidence of cointegration.
Out[40]:
Score Test Statistic P-value Cointegrated
20 fts/etherium -3.313329 0.052930 No
19 fts/bitcoin -3.220781 0.066521 No
24 pound/euro -3.124495 0.083488 No
7 bitcoin/etherium -3.055383 0.097620 No
31 sp/bitcoin -3.033512 0.102453 No
32 sp/etherium -3.003163 0.109455 No
13 etherium/bitcoin -2.998084 0.110662 No
16 etherium/sp -2.952618 0.121902 No
38 cac/etherium -2.930970 0.127539 No
18 fts/euro -2.881400 0.141153 No
37 cac/bitcoin -2.872820 0.143611 No
10 bitcoin/sp -2.869772 0.144491 No
3 euro/pound -2.819941 0.159422 No
22 fts/sp -2.688672 0.203679 No
17 etherium/cac -2.612075 0.231993 No
21 fts/pound -2.541255 0.261184 No
23 fts/cac -2.419840 0.315517 No
11 bitcoin/cac -2.387102 0.330988 No
25 pound/bitcoin -2.293248 0.376927 No
41 cac/sp -2.227418 0.410273 No
14 etherium/fts -2.138401 0.456318 No
26 pound/etherium -2.134090 0.458566 No
35 sp/cac -2.043424 0.505945 No
8 bitcoin/fts -1.944669 0.557192 No
36 cac/euro -1.944553 0.557252 No
28 pound/sp -1.913904 0.572938 No
30 sp/euro -1.884490 0.587852 No
9 bitcoin/pound -1.794926 0.632183 No
29 pound/cac -1.745390 0.655849 No
6 bitcoin/euro -1.531495 0.748918 No
40 cac/pound -1.506586 0.758658 No
27 pound/fts -1.461676 0.775593 No
12 etherium/euro -1.414706 0.792428 No
15 etherium/pound -1.410657 0.793837 No
39 cac/fts -1.393226 0.799826 No
0 euro/bitcoin -1.367654 0.808385 No
4 euro/sp -1.333970 0.819250 No
1 euro/etherium -1.308186 0.827252 No
33 sp/fts -1.289156 0.832984 No
2 euro/fts -1.236925 0.847965 No
5 euro/cac -1.222632 0.851875 No
34 sp/pound -1.192515 0.859850 No
In [41]:
granger = test_all_pairs_granger_causality(all_data_mean, max_lags=5)
/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

/Users/simongobin/anaconda3/lib/python3.11/site-packages/statsmodels/tsa/stattools.py:1488: FutureWarning:

verbose is deprecated since functions should not print results

In [42]:
fig= px.scatter(coint_matrix, x='Score',y='P-value', title="Cointvariance result")
fig.add_hline(y=0.05, line_dash="dash", line_color="red")
In [43]:
fig= px.scatter(granger, x='Causality Vallues',y='P-value', title="Granger causality result",color="Lag")
fig.add_hline(y=0.05, line_dash="dash", line_color="red")

I found a big cointegration score that mean a bit link between the Ehterium and FTSE 100 so i create a Plot line fro visulise this result

In [44]:
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(20, 15), sharex=False)


all_data_mean['etherium'].plot(ax=ax1, fontsize=16, color='blue')
ax1.set_ylabel('Mean Price Etherium')
ax1.set_title('Mean Prices of Etherium and FTSE 100 with Rolling Correlation')


all_data_mean['fts'].plot(ax=ax2, fontsize=16, color='orange')
ax2.set_ylabel('Mean Price FTS 100')


rolling_corr = all_data_mean['fts'].rolling(30).corr(all_data_mean['bitcoin'])
rolling_corr.plot(ax=ax3, fontsize=16, color='green')
ax3.set_ylabel('Rolling Correlation')
ax3.axhline(y=0, color='red', linestyle='--', linewidth=2)


ax1.legend(['Etherium Mean Price'])
ax2.legend(['FTSE 100 Mean Price'])
ax3.legend(['Rolling Correlation FTSE/BTC'])

plt.style.use('seaborn-v0_8-colorblind')
No description has been provided for this image

Majore variaiton¶

I have extract the 50 largest varaition in positive and 50 largest varaiton in negative and create a new datframe with this vallue

In [45]:
top_10_largest = all_data_mean.pct_change().apply(lambda col: col.nlargest(50))
top_10_smallest = all_data_mean.pct_change().apply(lambda col: col.nsmallest(50))
top10 = top_10_largest.copy()
top10_2=top_10_smallest.copy()
df_top = pd.concat([top10, top10_2])

I need to convert the new data frame in long fromat for create a historam with plotly

In [46]:
# Convert the DataFrame to a long format
df_long = df_top.reset_index().melt(id_vars='Date', var_name='Variable', value_name='Value')

# Create a scatter plot with a separate facet for each variable
fig = px.histogram(df_long, x='Date', y='Value', facet_row='Variable', title="Majore variation of price",nbins=260)

# Update layout for better readability
fig.update_layout(height=1200, showlegend=False)
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_yaxes(matches=None) 

fig.show()
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]: